unsigned long addr,
struct xen_ia64_privcmd_range* privcmd_range,
int i,
- unsigned long mfn,
+ unsigned long gmfn,
pgprot_t prot,
domid_t domid)
{
unsigned long gpfn;
unsigned long flags;
- if ((addr & ~PAGE_MASK) != 0 || mfn == INVALID_MFN) {
+ if ((addr & ~PAGE_MASK) != 0 || gmfn == INVALID_MFN) {
error = -EINVAL;
goto out;
}
if (pgprot_val(prot) == PROT_READ) {
flags = ASSIGN_readonly;
}
- error = HYPERVISOR_add_physmap(gpfn, mfn, flags, domid);
+ error = HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn, flags, domid);
if (error != 0) {
goto out;
}
int
direct_remap_pfn_range(struct vm_area_struct *vma,
unsigned long address, // process virtual address
- unsigned long mfn, // mfn, mfn + 1, ... mfn + size/PAGE_SIZE
+ unsigned long gmfn, // gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE
unsigned long size,
pgprot_t prot,
domid_t domid) // target domain
i = (address - vma->vm_start) >> PAGE_SHIFT;
for (offset = 0; offset < size; offset += PAGE_SIZE) {
- error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, mfn, prot, domid);
+ error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, gmfn, prot, domid);
if (error != 0) {
break;
}
i++;
- mfn++;
+ gmfn++;
}
return error;
return ret;
}
+static inline unsigned long
+__HYPERVISOR_add_physmap_with_gmfn(unsigned long gpfn, unsigned long gmfn,
+ unsigned long flags, domid_t domid)
+{
+ return _hypercall5(unsigned long, ia64_dom0vp_op,
+ IA64_DOM0VP_add_physmap_with_gmfn,
+ gpfn, gmfn, flags, domid);
+}
+
+static inline unsigned long
+HYPERVISOR_add_physmap_with_gmfn(unsigned long gpfn, unsigned long gmfn,
+ unsigned long flags, domid_t domid)
+{
+ unsigned long ret = 0;
+ BUG_ON(!is_running_on_xen());//XXX
+ if (is_running_on_xen()) {
+ ret = __HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn,
+ flags, domid);
+ }
+ return ret;
+}
+
#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
static inline unsigned long
HYPERVISOR_expose_p2m(unsigned long conv_start_gpfn,
start_info_t *start_info;
unsigned long start_info_mpa;
struct xen_ia64_boot_param *bp;
+#if 0 // see comment below
shared_info_t *shared_info;
+#endif
int i;
DECLARE_DOMCTL;
int rc;
(load_funcs.loadimage)(image, image_size, xc_handle, dom,
page_array + start_page, &dsi);
- *store_mfn = page_array[nr_pages - 2];
- *console_mfn = page_array[nr_pages - 1];
+ *store_mfn = page_array[nr_pages - 2]; //XXX
+ *console_mfn = page_array[nr_pages - 1]; //XXX
IPRINTF("start_info: 0x%lx at 0x%lx, "
"store_mfn: 0x%lx at 0x%lx, "
"console_mfn: 0x%lx at 0x%lx\n",
ctxt->user_regs.r28 = start_info_mpa + sizeof (start_info_t);
munmap(start_info, PAGE_SIZE);
+#if 0
+ /*
+ * XXX FIXME:
+ * The follwoing initialization is done by XEN_DOMCTL_arch_setup as
+ * work around.
+ * Should XENMEM_add_to_physmap with XENMAPSPACE_shared_info be used?
+ */
+
/* shared_info page starts its life empty. */
shared_info = xc_map_foreign_range(
xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE, shared_info_frame);
shared_info->arch.start_info_pfn = nr_pages - 3;
munmap(shared_info, PAGE_SIZE);
-
+#endif
free(page_array);
return 0;
for_each_vcpu (d, v)
v->arch.breakimm = d->arch.breakimm;
}
+#if 1
+ /*
+ * XXX FIXME
+ * see comment around shared_info in setup_guest() in
+ * libxc/xc_linux_build.c
+ */
+ {
+ int i;
+ d->shared_info->arch.start_info_pfn = ds->maxmem >> PAGE_SHIFT;
+ for_each_cpu(i)
+ d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
+ }
+#endif
}
}
ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2,
(domid_t)arg3);
break;
+ case IA64_DOM0VP_add_physmap_with_gmfn:
+ ret = dom0vp_add_physmap_with_gmfn(d, arg0, arg1, (unsigned int)arg2,
+ (domid_t)arg3);
+ break;
case IA64_DOM0VP_expose_p2m:
ret = dom0vp_expose_p2m(d, arg0, arg1, arg2, arg3);
break;
return 0;
}
-unsigned long
-dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn,
- unsigned long flags, domid_t domid)
+static unsigned long
+__dom0vp_add_physmap(struct domain* d, unsigned long gpfn,
+ unsigned long mfn_or_gmfn,
+ unsigned long flags, domid_t domid, int is_gmfn)
{
- int error = 0;
+ int error = -EINVAL;
struct domain* rd;
+ unsigned long mfn;
/* Not allowed by a domain. */
if (flags & (ASSIGN_nocache | ASSIGN_pgc_allocated))
break;
default:
gdprintk(XENLOG_INFO, "d 0x%p domid %d "
- "pgfn 0x%lx mfn 0x%lx flags 0x%lx domid %d\n",
- d, d->domain_id, gpfn, mfn, flags, domid);
+ "pgfn 0x%lx mfn_or_gmfn 0x%lx flags 0x%lx domid %d\n",
+ d, d->domain_id, gpfn, mfn_or_gmfn, flags, domid);
return -ESRCH;
}
BUG_ON(rd == NULL);
get_knownalive_domain(rd);
}
- if (unlikely(rd == d || !mfn_valid(mfn))) {
- error = -EINVAL;
+ if (unlikely(rd == d))
goto out1;
- }
- if (unlikely(get_page(mfn_to_page(mfn), rd) == 0)) {
- error = -EINVAL;
+ if (is_gmfn) {
+ if (domid == DOMID_XEN || domid == DOMID_IO)
+ goto out1;
+ mfn = gmfn_to_mfn(rd, mfn_or_gmfn);
+ } else
+ mfn = mfn_or_gmfn;
+ if (unlikely(!mfn_valid(mfn) || get_page(mfn_to_page(mfn), rd) == 0))
goto out1;
- }
+
+ error = 0;
BUG_ON(page_get_owner(mfn_to_page(mfn)) == d &&
get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags);
return error;
}
+unsigned long
+dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn,
+ unsigned long flags, domid_t domid)
+{
+ return __dom0vp_add_physmap(d, gpfn, mfn, flags, domid, 0);
+}
+
+unsigned long
+dom0vp_add_physmap_with_gmfn(struct domain* d, unsigned long gpfn,
+ unsigned long gmfn, unsigned long flags,
+ domid_t domid)
+{
+ return __dom0vp_add_physmap(d, gpfn, gmfn, flags, domid, 1);
+}
+
#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
static struct page_info* p2m_pte_zero_page = NULL;
extern unsigned long do_dom0vp_op(unsigned long cmd, unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3);
extern unsigned long dom0vp_zap_physmap(struct domain *d, unsigned long gpfn, unsigned int extent_order);
extern unsigned long dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn, unsigned long flags, domid_t domid);
+extern unsigned long dom0vp_add_physmap_with_gmfn(struct domain* d, unsigned long gpfn, unsigned long gmfn, unsigned long flags, domid_t domid);
#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
extern void expose_p2m_init(void);
extern unsigned long dom0vp_expose_p2m(struct domain* d, unsigned long conv_start_gpfn, unsigned long assign_start_gpfn, unsigned long expose_size, unsigned long granule_pfn);
/* xen perfmon */
#define IA64_DOM0VP_perfmon 8
+/* gmfn version of IA64_DOM0VP_add_physmap */
+#define IA64_DOM0VP_add_physmap_with_gmfn 9
+
// flags for page assignement to pseudo physical address space
#define _ASSIGN_readonly 0
#define ASSIGN_readonly (1UL << _ASSIGN_readonly)